From 1f698f3f9f667540ee8256dc05dc41641b8859e7 Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Wed, 20 Jun 2007 16:18:03 +0100 Subject: [PATCH] x86: introduce specialized clear_page() More than doubles performance of page clearing on not too old processors (SSE2 supported). Signed-off-by: Jan Beulich Signed-off-by: Keir Fraser --- xen/arch/x86/Makefile | 1 + xen/arch/x86/clear_page.S | 26 ++++++++++++++++++++++++++ xen/arch/x86/domain.c | 9 ++++++--- xen/arch/x86/x86_64/Makefile | 2 +- xen/arch/x86/x86_64/mm.c | 9 ++++++--- xen/include/asm-x86/page.h | 11 ++++++++--- 6 files changed, 48 insertions(+), 10 deletions(-) create mode 100644 xen/arch/x86/clear_page.S diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile index c7ccbda0d5..3899874714 100644 --- a/xen/arch/x86/Makefile +++ b/xen/arch/x86/Makefile @@ -10,6 +10,7 @@ subdir-$(x86_64) += x86_64 obj-y += apic.o obj-y += bitops.o +obj-y += clear_page.o obj-y += compat.o obj-y += delay.o obj-y += dmi_scan.o diff --git a/xen/arch/x86/clear_page.S b/xen/arch/x86/clear_page.S new file mode 100644 index 0000000000..d3e8bba915 --- /dev/null +++ b/xen/arch/x86/clear_page.S @@ -0,0 +1,26 @@ +#include +#include + +#ifdef __i386__ +#define ptr_reg %edx +#else +#define ptr_reg %rdi +#endif + +ENTRY(clear_page_sse2) +#ifdef __i386__ + mov 4(%esp), ptr_reg +#endif + mov $PAGE_SIZE/16, %ecx + xor %eax,%eax + +0: dec %ecx + movnti %eax, (ptr_reg) + movnti %eax, 4(ptr_reg) + movnti %eax, 8(ptr_reg) + movnti %eax, 12(ptr_reg) + lea 16(ptr_reg), ptr_reg + jnz 0b + + sfence + ret diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 79dc4baa37..bf83032a29 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -151,7 +151,8 @@ int setup_arg_xlat_area(struct vcpu *v, l4_pgentry_t *l4tab) pg = alloc_domheap_page(NULL); if ( !pg ) return -ENOMEM; - d->arch.mm_arg_xlat_l3 = clear_page(page_to_virt(pg)); + d->arch.mm_arg_xlat_l3 = page_to_virt(pg); + clear_page(d->arch.mm_arg_xlat_l3); } l4tab[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] = @@ -444,7 +445,8 @@ int arch_domain_create(struct domain *d) if ( (pg = alloc_domheap_page(NULL)) == NULL ) goto fail; - d->arch.mm_perdomain_l2 = clear_page(page_to_virt(pg)); + d->arch.mm_perdomain_l2 = page_to_virt(pg); + clear_page(d->arch.mm_perdomain_l2); for ( i = 0; i < (1 << pdpt_order); i++ ) d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] = l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i, @@ -452,7 +454,8 @@ int arch_domain_create(struct domain *d) if ( (pg = alloc_domheap_page(NULL)) == NULL ) goto fail; - d->arch.mm_perdomain_l3 = clear_page(page_to_virt(pg)); + d->arch.mm_perdomain_l3 = page_to_virt(pg); + clear_page(d->arch.mm_perdomain_l3); d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2), __PAGE_HYPERVISOR); diff --git a/xen/arch/x86/x86_64/Makefile b/xen/arch/x86/x86_64/Makefile index 98f20a43c6..f712ca66f3 100644 --- a/xen/arch/x86/x86_64/Makefile +++ b/xen/arch/x86/x86_64/Makefile @@ -1,12 +1,12 @@ subdir-y += compat obj-y += entry.o -obj-y += compat_kexec.o obj-y += gpr_switch.o obj-y += mm.o obj-y += traps.o obj-$(CONFIG_COMPAT) += compat.o +obj-$(CONFIG_COMPAT) += compat_kexec.o obj-$(CONFIG_COMPAT) += domain.o obj-$(CONFIG_COMPAT) += physdev.o obj-$(CONFIG_COMPAT) += platform_hypercall.o diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index 444432f136..f7f7a7b802 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -106,7 +106,8 @@ void __init paging_init(void) /* Create user-accessible L2 directory to map the MPT for guests. */ if ( (l2_pg = alloc_domheap_page(NULL)) == NULL ) goto nomem; - l3_ro_mpt = clear_page(page_to_virt(l2_pg)); + l3_ro_mpt = page_to_virt(l2_pg); + clear_page(l3_ro_mpt); l4e_write(&idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)], l4e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER)); @@ -132,7 +133,8 @@ void __init paging_init(void) if ( (l2_pg = alloc_domheap_page(NULL)) == NULL ) goto nomem; va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT); - l2_ro_mpt = clear_page(page_to_virt(l2_pg)); + l2_ro_mpt = page_to_virt(l2_pg); + clear_page(l2_ro_mpt); l3e_write(&l3_ro_mpt[l3_table_offset(va)], l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER)); l2_ro_mpt += l2_table_offset(va); @@ -152,7 +154,8 @@ void __init paging_init(void) l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(HIRO_COMPAT_MPT_VIRT_START)]); if ( (l2_pg = alloc_domheap_page(NULL)) == NULL ) goto nomem; - compat_idle_pg_table_l2 = l2_ro_mpt = clear_page(page_to_virt(l2_pg)); + compat_idle_pg_table_l2 = l2_ro_mpt = page_to_virt(l2_pg); + clear_page(l2_ro_mpt); l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)], l3e_from_page(l2_pg, __PAGE_HYPERVISOR)); l2_ro_mpt += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START); diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h index 681da219a3..4b4133214b 100644 --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -192,8 +192,9 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags) #define pgentry_ptr_to_slot(_p) \ (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p))) -/* Page-table type. */ #ifndef __ASSEMBLY__ + +/* Page-table type. */ #if CONFIG_PAGING_LEVELS == 2 /* x86_32 default */ typedef struct { u32 pfn; } pagetable_t; @@ -214,9 +215,11 @@ typedef struct { u64 pfn; } pagetable_t; #define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg)) #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT) #define pagetable_null() pagetable_from_pfn(0) -#endif -#define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE) +void clear_page_sse2(void *); +#define clear_page(_p) (cpu_has_xmm2 ? \ + clear_page_sse2((void *)(_p)) : \ + (void)memset((void *)(_p), 0, PAGE_SIZE)) #define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE) #define mfn_valid(mfn) ((mfn) < max_page) @@ -245,6 +248,8 @@ typedef struct { u64 pfn; } pagetable_t; #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT) #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT)) +#endif /* !defined(__ASSEMBLY__) */ + /* High table entries are reserved by the hypervisor. */ #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) #define DOMAIN_ENTRIES_PER_L2_PAGETABLE \ -- 2.30.2